return (nflags & (GTF_reading|GTF_writing));
}
-void
+int
gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
{
u16 flags, nflags;
nflags = shared[ref].flags;
do {
- if ( (flags = nflags) & (GTF_reading|GTF_writing) )
+ if ( (flags = nflags) & (GTF_reading|GTF_writing) ) {
printk(KERN_ALERT "WARNING: g.e. still in use!\n");
+ return 0;
+ }
}
while ((nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) !=
flags);
+
+ return 1;
}
void
-gnttab_end_foreign_access(grant_ref_t ref, int readonly)
+gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page)
{
- gnttab_end_foreign_access_ref(ref, readonly);
- put_free_entry(ref);
+ if (gnttab_end_foreign_access_ref(ref, readonly)) {
+ put_free_entry(ref);
+ if (page != 0) {
+ free_page(page);
+ }
+ }
+ else {
+ /* XXX This needs to be fixed so that the ref and page are
+ placed on a list to be freed up later. */
+ printk(KERN_WARNING
+ "WARNING: leaking g.e. and page still in use!\n");
+ }
}
int
spin_unlock_irq(&blkif_io_lock);
/* Free resources associated with old device channel. */
- if (info->ring.sring != NULL) {
- free_page((unsigned long)info->ring.sring);
+ if (info->ring_ref != GRANT_INVALID_REF) {
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+ info->ring_ref = GRANT_INVALID_REF;
info->ring.sring = NULL;
}
- if (info->ring_ref != GRANT_INVALID_REF)
- gnttab_end_foreign_access(info->ring_ref, 0);
- info->ring_ref = GRANT_INVALID_REF;
if (info->irq)
unbind_evtchn_from_irqhandler(info->irq, info);
info->evtchn = info->irq = 0;
err = HYPERVISOR_event_channel_op(&op);
if (err) {
- gnttab_end_foreign_access(info->ring_ref, 0);
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
info->ring_ref = GRANT_INVALID_REF;
- free_page((unsigned long)info->ring.sring);
- info->ring.sring = 0;
+ info->ring.sring = NULL;
xenbus_dev_error(dev, err, "allocating event channel");
return err;
}
int i;
for (i = 0; i < s->req.nr_segments; i++)
gnttab_end_foreign_access(
- blkif_gref_from_fas(s->req.frame_and_sects[i]), 0);
+ blkif_gref_from_fas(s->req.frame_and_sects[i]), 0, 0UL);
}
/*
static void network_tx_buf_gc(struct net_device *dev);
static void network_alloc_rx_buffers(struct net_device *dev);
+static void netif_free(struct netfront_info *info);
+
static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
info->tx_ring_ref = GRANT_INVALID_REF;
info->rx_ring_ref = GRANT_INVALID_REF;
+ info->rx = NULL;
+ info->tx = NULL;
+ info->irq = 0;
info->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
if (info->tx == 0) {
return 0;
out:
- if (info->tx)
- free_page((unsigned long)info->tx);
- info->tx = 0;
- if (info->rx)
- free_page((unsigned long)info->rx);
- info->rx = 0;
-
- if (info->tx_ring_ref != GRANT_INVALID_REF)
- gnttab_end_foreign_access(info->tx_ring_ref, 0);
- info->tx_ring_ref = GRANT_INVALID_REF;
-
- if (info->rx_ring_ref != GRANT_INVALID_REF)
- gnttab_end_foreign_access(info->rx_ring_ref, 0);
- info->rx_ring_ref = GRANT_INVALID_REF;
-
+ netif_free(info);
return err;
}
+static void end_access(int ref, void *page)
+{
+ if (ref != GRANT_INVALID_REF)
+ gnttab_end_foreign_access(ref, 0, (unsigned long)page);
+}
+
static void netif_free(struct netfront_info *info)
{
- if (info->tx)
- free_page((unsigned long)info->tx);
- info->tx = 0;
- if (info->rx)
- free_page((unsigned long)info->rx);
- info->rx = 0;
-
- if (info->tx_ring_ref != GRANT_INVALID_REF)
- gnttab_end_foreign_access(info->tx_ring_ref, 0);
+ end_access(info->tx_ring_ref, info->tx);
+ end_access(info->rx_ring_ref, info->rx);
info->tx_ring_ref = GRANT_INVALID_REF;
-
- if (info->rx_ring_ref != GRANT_INVALID_REF)
- gnttab_end_foreign_access(info->rx_ring_ref, 0);
info->rx_ring_ref = GRANT_INVALID_REF;
+ info->tx = NULL;
+ info->rx = NULL;
if (info->irq)
unbind_evtchn_from_irqhandler(info->irq, info->netdev);
err = HYPERVISOR_event_channel_op(&op);
if (err) {
- gnttab_end_foreign_access(info->ring_ref, 0);
- free_page((unsigned long)sring);
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)sring);
tp->tx = NULL;
xenbus_dev_error(dev, err, "allocating event channel");
return err;
tpmif_set_connected_state(tp,0);
if ( tp->tx != NULL ) {
- gnttab_end_foreign_access(info->ring_ref, 0);
- free_page((unsigned long)tp->tx);
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)tp->tx);
tp->tx = NULL;
}
int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
int readonly);
-void gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
-void gnttab_end_foreign_access(grant_ref_t ref, int readonly);
+/*
+ * End access through the given grant reference, iff the grant entry is no
+ * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
+ * use.
+ */
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
+
+/*
+ * Eventually end access through the given grant reference, and once that
+ * access has been ended, free the given page too. Access will be ended
+ * immediately iff the grant entry is not in use, otherwise it will happen
+ * some time later. page may be 0, in which case no freeing will occur.
+ */
+void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
+ unsigned long page);
int gnttab_grant_foreign_transfer(domid_t domid);